From: Dario Faggioli Date: Tue, 14 Apr 2015 12:56:13 +0000 (+0200) Subject: rework locking for dump of scheduler info (debug-key r) X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~3443 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=b7c82abc1a920bcccbd64abe4c388f7208a2c185;p=xen.git rework locking for dump of scheduler info (debug-key r) such as it is taken care of by the various schedulers, rather than happening in schedule.c. In fact, it is the schedulers that know better which locks are necessary for the specific dumping operations. While there, fix a few style issues (indentation, trailing whitespace, parentheses and blank line after var declarations) Signed-off-by: Dario Faggioli Reviewed-by: George Dunlap --- diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index bec67ff362..953ecb0e5b 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -25,6 +25,23 @@ #include +/* + * Locking: + * - Scheduler-lock (a.k.a. runqueue lock): + * + is per-runqueue, and there is one runqueue per-cpu; + * + serializes all runqueue manipulation operations; + * - Private data lock (a.k.a. private scheduler lock): + * + serializes accesses to the scheduler global state (weight, + * credit, balance_credit, etc); + * + serializes updates to the domains' scheduling parameters. + * + * Ordering is "private lock always comes first": + * + if we need both locks, we must acquire the private + * scheduler lock for first; + * + if we already own a runqueue lock, we must never acquire + * the private scheduler lock. + */ + /* * Basic constants */ @@ -1750,11 +1767,24 @@ static void csched_dump_pcpu(const struct scheduler *ops, int cpu) { struct list_head *runq, *iter; + struct csched_private *prv = CSCHED_PRIV(ops); struct csched_pcpu *spc; struct csched_vcpu *svc; + spinlock_t *lock = lock; + unsigned long flags; int loop; #define cpustr keyhandler_scratch + /* + * We need both locks: + * - csched_dump_vcpu() wants to access domains' scheduling + * parameters, which are protected by the private scheduler lock; + * - we scan through the runqueue, so we need the proper runqueue + * lock (the one of the runqueue of this cpu). + */ + spin_lock_irqsave(&prv->lock, flags); + lock = pcpu_schedule_lock(cpu); + spc = CSCHED_PCPU(cpu); runq = &spc->runq; @@ -1781,6 +1811,9 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu) csched_dump_vcpu(svc); } } + + pcpu_schedule_unlock(lock, cpu); + spin_unlock_irqrestore(&prv->lock, flags); #undef cpustr } @@ -1792,7 +1825,7 @@ csched_dump(const struct scheduler *ops) int loop; unsigned long flags; - spin_lock_irqsave(&(prv->lock), flags); + spin_lock_irqsave(&prv->lock, flags); #define idlers_buf keyhandler_scratch @@ -1835,15 +1868,20 @@ csched_dump(const struct scheduler *ops) list_for_each( iter_svc, &sdom->active_vcpu ) { struct csched_vcpu *svc; + spinlock_t *lock; + svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem); + lock = vcpu_schedule_lock(svc->vcpu); printk("\t%3d: ", ++loop); csched_dump_vcpu(svc); + + vcpu_schedule_unlock(lock, svc->vcpu); } } #undef idlers_buf - spin_unlock_irqrestore(&(prv->lock), flags); + spin_unlock_irqrestore(&prv->lock, flags); } static int diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 90f85a2598..b501f28639 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -51,8 +51,6 @@ * credit2 wiki page: * http://wiki.xen.org/wiki/Credit2_Scheduler_Development * TODO: - * + Immediate bug-fixes - * - Do per-runqueue, grab proper lock for dump debugkey * + Multiple sockets * - Detect cpu layout and make runqueue map, one per L2 (make_runq_map()) * - Simple load balancer / runqueue assignment @@ -1832,12 +1830,24 @@ csched2_dump_vcpu(struct csched2_vcpu *svc) static void csched2_dump_pcpu(const struct scheduler *ops, int cpu) { + struct csched2_private *prv = CSCHED2_PRIV(ops); struct list_head *runq, *iter; struct csched2_vcpu *svc; + unsigned long flags; + spinlock_t *lock; int loop; char cpustr[100]; - /* FIXME: Do locking properly for access to runqueue structures */ + /* + * We need both locks: + * - csched2_dump_vcpu() wants to access domains' scheduling + * parameters, which are protected by the private scheduler lock; + * - we scan through the runqueue, so we need the proper runqueue + * lock (the one of the runqueue this cpu is associated to). + */ + spin_lock_irqsave(&prv->lock, flags); + lock = per_cpu(schedule_data, cpu).schedule_lock; + spin_lock(lock); runq = &RQD(ops, cpu)->runq; @@ -1864,6 +1874,9 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu) csched2_dump_vcpu(svc); } } + + spin_unlock(lock); + spin_unlock_irqrestore(&prv->lock, flags); } static void @@ -1871,8 +1884,13 @@ csched2_dump(const struct scheduler *ops) { struct list_head *iter_sdom, *iter_svc; struct csched2_private *prv = CSCHED2_PRIV(ops); + unsigned long flags; int i, loop; + /* We need the private lock as we access global scheduler data + * and (below) the list of active domains. */ + spin_lock_irqsave(&prv->lock, flags); + printk("Active queues: %d\n" "\tdefault-weight = %d\n", cpumask_weight(&prv->active_queues), @@ -1895,7 +1913,6 @@ csched2_dump(const struct scheduler *ops) fraction); } - /* FIXME: Locking! */ printk("Domain info:\n"); loop = 0; @@ -1904,20 +1921,27 @@ csched2_dump(const struct scheduler *ops) struct csched2_dom *sdom; sdom = list_entry(iter_sdom, struct csched2_dom, sdom_elem); - printk("\tDomain: %d w %d v %d\n\t", - sdom->dom->domain_id, - sdom->weight, - sdom->nr_vcpus); + printk("\tDomain: %d w %d v %d\n\t", + sdom->dom->domain_id, + sdom->weight, + sdom->nr_vcpus); list_for_each( iter_svc, &sdom->vcpu ) { struct csched2_vcpu *svc; + spinlock_t *lock; + svc = list_entry(iter_svc, struct csched2_vcpu, sdom_elem); + lock = vcpu_schedule_lock(svc->vcpu); printk("\t%3d: ", ++loop); csched2_dump_vcpu(svc); + + vcpu_schedule_unlock(lock, svc->vcpu); } } + + spin_unlock_irqrestore(&prv->lock, flags); } static void activate_runqueue(struct csched2_private *prv, int rqi) diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 2b0b7c6f8a..7c39a9e870 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -253,9 +253,12 @@ rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc) static void rt_dump_pcpu(const struct scheduler *ops, int cpu) { - struct rt_vcpu *svc = rt_vcpu(curr_on_cpu(cpu)); + struct rt_private *prv = rt_priv(ops); + unsigned long flags; - rt_dump_vcpu(ops, svc); + spin_lock_irqsave(&prv->lock, flags); + rt_dump_vcpu(ops, rt_vcpu(curr_on_cpu(cpu))); + spin_unlock_irqrestore(&prv->lock, flags); } static void diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c index c4f4b601f5..a1a4cb7287 100644 --- a/xen/common/sched_sedf.c +++ b/xen/common/sched_sedf.c @@ -1206,12 +1206,25 @@ static void sedf_dump_domain(struct vcpu *d) /* Dumps all domains on the specified cpu */ static void sedf_dump_cpu_state(const struct scheduler *ops, int i) { + struct sedf_priv_info *prv = SEDF_PRIV(ops); struct list_head *list, *queue, *tmp; struct sedf_vcpu_info *d_inf; struct domain *d; struct vcpu *ed; + spinlock_t *lock; + unsigned long flags; int loop = 0; + /* + * We need both locks, as: + * - we access domains' parameters, which are protected by the + * private scheduler lock; + * - we scan through the various queues, so we need the proper + * runqueue lock (i.e., the one for this pCPU). + */ + spin_lock_irqsave(&prv->lock, flags); + lock = pcpu_schedule_lock(i); + printk("now=%"PRIu64"\n",NOW()); queue = RUNQ(i); printk("RUNQ rq %lx n: %lx, p: %lx\n", (unsigned long)queue, @@ -1275,6 +1288,9 @@ static void sedf_dump_cpu_state(const struct scheduler *ops, int i) } } rcu_read_unlock(&domlist_read_lock); + + pcpu_schedule_unlock(lock, i); + spin_unlock_irqrestore(&prv->lock, flags); } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 88770c6ce7..f5a2e55a59 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1515,6 +1515,8 @@ void schedule_dump(struct cpupool *c) struct scheduler *sched; cpumask_t *cpus; + /* Locking, if necessary, must be handled withing each scheduler */ + sched = (c == NULL) ? &ops : c->sched; cpus = cpupool_scheduler_cpumask(c); printk("Scheduler: %s (%s)\n", sched->name, sched->opt_name); @@ -1522,11 +1524,8 @@ void schedule_dump(struct cpupool *c) for_each_cpu (i, cpus) { - spinlock_t *lock = pcpu_schedule_lock(i); - printk("CPU[%02d] ", i); SCHED_OP(sched, dump_cpu_state, i); - pcpu_schedule_unlock(lock, i); } }